All cpu bitmasks in Xen now use the cpumask_t type and its operators.
Signed-off-by: Keir Fraser <keir@xensource.com>
{
vcpu_pend_interrupt(dom0->vcpu[0],irq);
}
-
-/////////////////////////////////
-// added 01Apr2005, to accomodate change in xen/sched.h, not clear
-// yet if this functionality is needed on ia64
-#if 0
-static void __synchronise_lazy_execstate(void *unused)
-{
- if ( percpu_ctxt[smp_processor_id()].curr_ed != current )
- {
- __context_switch();
- load_LDT(current);
- clear_segments();
- }
-}
-#endif
-
-void synchronise_lazy_execstate(unsigned long cpuset)
-{
- //smp_subset_call_function(__synchronise_lazy_execstate, NULL, 1, cpuset);
-}
-/////////////////////////////////
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void flush_tlb_mask(unsigned long mask)
+void flush_tlb_mask(cpumask_t mask)
{
dummy();
}
//#if CONFIG_SMP || IA64
#if CONFIG_SMP
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void smp_send_event_check_mask(unsigned long cpu_mask)
+void smp_send_event_check_mask(cpumask_t mask)
{
dummy();
//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-int try_flush_tlb_mask(unsigned long mask)
+int try_flush_tlb_mask(cpumask_t mask)
{
dummy();
return 1;
/* calls in xen/common code that are unused on ia64 */
-void sync_lazy_execstate_cpuset(unsigned long cpuset) {}
+void sync_lazy_execstate_cpu(unsigned int cpu) {}
+void sync_lazy_execstate_mask(cpumask_t mask) {}
void sync_lazy_execstate_all(void) {}
int grant_table_create(struct domain *d) { return 0; }
ASSERT(is_idle_task(v->domain));
percpu_ctxt[smp_processor_id()].curr_vcpu = v;
- set_bit(smp_processor_id(), &v->domain->cpuset);
+ cpu_set(smp_processor_id(), v->domain->cpumask);
v->arch.schedule_tail = continue_idle_task;
idle_loop();
}
if ( p->domain != n->domain )
- set_bit(cpu, &n->domain->cpuset);
+ cpu_set(cpu, n->domain->cpumask);
write_ptbase(n);
}
if ( p->domain != n->domain )
- clear_bit(cpu, &p->domain->cpuset);
+ cpu_clear(cpu, p->domain->cpumask);
percpu_ctxt[cpu].curr_vcpu = n;
}
return 1;
}
-void sync_lazy_execstate_cpuset(unsigned long cpuset)
+void sync_lazy_execstate_cpu(unsigned int cpu)
{
- if ( cpuset & (1 << smp_processor_id()) )
+ if ( cpu == smp_processor_id() )
+ (void)__sync_lazy_execstate();
+ else
+ flush_tlb_mask(cpumask_of_cpu(cpu));
+}
+
+void sync_lazy_execstate_mask(cpumask_t mask)
+{
+ if ( cpu_isset(smp_processor_id(), mask) )
(void)__sync_lazy_execstate();
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(cpuset & ~(1 << smp_processor_id()));
+ flush_tlb_mask(mask);
}
void sync_lazy_execstate_all(void)
{
__sync_lazy_execstate();
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(((1<<num_online_cpus())-1) & ~(1 << smp_processor_id()));
+ flush_tlb_mask(cpu_online_map);
}
unsigned long __hypercall_create_continuation(
{
struct vcpu *v;
- BUG_ON(d->cpuset != 0);
+ BUG_ON(!cpus_empty(d->cpumask));
physdev_destroy_state(d);
* may be unnecessary (e.g., page was GDT/LDT) but those
* circumstances should be very rare.
*/
- unsigned long cpuset = tlbflush_filter_cpuset(
- page_get_owner(page)->cpuset, page->tlbflush_timestamp);
+ cpumask_t mask = page_get_owner(page)->cpumask;
+ tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(cpuset != 0) )
+ if ( unlikely(!cpus_empty(mask)) )
{
perfc_incrc(need_flush_tlb_flush);
- flush_tlb_mask(cpuset);
+ flush_tlb_mask(mask);
}
/* We lose existing type, back pointer, and validity. */
return okay;
}
-static inline unsigned long vcpuset_to_pcpuset(
- struct domain *d, unsigned long vset)
+static inline cpumask_t vcpumask_to_pcpumask(
+ struct domain *d, unsigned long vmask)
{
- unsigned int vcpu;
- unsigned long pset = 0;
+ unsigned int vcpu_id;
+ cpumask_t pmask;
struct vcpu *v;
- while ( vset != 0 )
+ while ( vmask != 0 )
{
- vcpu = find_first_set_bit(vset);
- vset &= ~(1UL << vcpu);
- if ( (vcpu < MAX_VIRT_CPUS) &&
- ((v = d->vcpu[vcpu]) != NULL) )
- pset |= 1UL << v->processor;
+ vcpu_id = find_first_set_bit(vmask);
+ vmask &= ~(1UL << vcpu_id);
+ if ( (vcpu_id < MAX_VIRT_CPUS) &&
+ ((v = d->vcpu[vcpu_id]) != NULL) )
+ cpu_set(v->processor, pmask);
}
- return pset;
+ return pmask;
}
int do_mmuext_op(
case MMUEXT_TLB_FLUSH_MULTI:
case MMUEXT_INVLPG_MULTI:
{
- unsigned long vset, pset;
- if ( unlikely(get_user(vset, (unsigned long *)op.cpuset)) )
+ unsigned long vmask;
+ cpumask_t pmask;
+ if ( unlikely(get_user(vmask, (unsigned long *)op.vcpumask)) )
{
okay = 0;
break;
}
- pset = vcpuset_to_pcpuset(d, vset);
+ pmask = vcpumask_to_pcpumask(d, vmask);
+ cpus_and(pmask, pmask, d->cpumask);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
- {
- BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
- flush_tlb_mask(pset & d->cpuset);
- }
+ flush_tlb_mask(pmask);
else
- {
- BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
- flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
- }
+ flush_tlb_one_mask(pmask, op.linear_addr);
break;
}
case MMUEXT_TLB_FLUSH_ALL:
- BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
- flush_tlb_mask(d->cpuset);
+ flush_tlb_mask(d->cpumask);
break;
case MMUEXT_INVLPG_ALL:
- BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
- flush_tlb_one_mask(d->cpuset, op.linear_addr);
+ flush_tlb_one_mask(d->cpumask, op.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
struct vcpu *v = current;
struct domain *d = v->domain;
unsigned int cpu = v->processor;
- unsigned long vset, pset, bmap_ptr;
+ unsigned long vmask, bmap_ptr;
+ cpumask_t pmask;
int rc = 0;
perfc_incrc(calls_to_update_va);
local_flush_tlb();
break;
case UVMF_ALL:
- BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
- flush_tlb_mask(d->cpuset);
+ flush_tlb_mask(d->cpumask);
break;
default:
- if ( unlikely(get_user(vset, (unsigned long *)bmap_ptr)) )
+ if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
rc = -EFAULT;
- pset = vcpuset_to_pcpuset(d, vset);
- flush_tlb_mask(pset & d->cpuset);
+ pmask = vcpumask_to_pcpumask(d, vmask);
+ cpus_and(pmask, pmask, d->cpumask);
+ flush_tlb_mask(pmask);
break;
}
break;
local_flush_tlb_one(va);
break;
case UVMF_ALL:
- BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
- flush_tlb_one_mask(d->cpuset, va);
+ flush_tlb_one_mask(d->cpumask, va);
break;
default:
- if ( unlikely(get_user(vset, (unsigned long *)bmap_ptr)) )
+ if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
rc = -EFAULT;
- pset = vcpuset_to_pcpuset(d, vset);
- BUG_ON(shadow_mode_enabled(d) && (pset != (1<<cpu)));
- flush_tlb_one_mask(pset & d->cpuset, va);
+ pmask = vcpumask_to_pcpumask(d, vmask);
+ cpus_and(pmask, pmask, d->cpumask);
+ flush_tlb_one_mask(pmask, va);
break;
}
break;
/* Ensure that there are no stale writable mappings in any TLB. */
/* NB. INVLPG is a serialising instruction: flushes pending updates. */
- flush_tlb_one_mask(d->cpuset, l1va);
+ flush_tlb_one_mask(d->cpumask, l1va);
PTWR_PRINTK("[%c] disconnected_l1va at %p now %lx\n",
PTWR_PRINT_WHICH, ptep, pte);
if ( which == PTWR_PT_ACTIVE )
{
l2e_remove_flags(*pl2e, _PAGE_PRESENT);
- flush_tlb_mask(d->cpuset);
+ flush_tlb_mask(d->cpumask);
}
/* Temporarily map the L1 page, and make a copy of it. */
// No TLB flushes are needed the next time this page gets allocated.
//
page->tlbflush_timestamp = 0;
- page->u.free.cpu_mask = 0;
+ page->u.free.cpumask = CPU_MASK_NONE;
if ( type == PGT_l1_shadow )
{
// page table page needs to be vcpu private).
//
#if 0 // this should be enabled for SMP guests...
- flush_tlb_mask(((1<<num_online_cpus()) - 1) & ~(1<<smp_processor_id()));
+ flush_tlb_mask(cpu_online_map);
#endif
need_flush = 1;
#include <mach_ipi.h>
static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
-static unsigned long flush_cpumask, flush_va;
+static cpumask_t flush_cpumask;
+static unsigned long flush_va;
asmlinkage void smp_invalidate_interrupt(void)
{
else
local_flush_tlb_one(flush_va);
}
- clear_bit(smp_processor_id(), &flush_cpumask);
+ cpu_clear(smp_processor_id(), flush_cpumask);
}
-void __flush_tlb_mask(unsigned long mask, unsigned long va)
+void __flush_tlb_mask(cpumask_t mask, unsigned long va)
{
ASSERT(local_irq_is_enabled());
- if ( mask & (1UL << smp_processor_id()) )
+ if ( cpu_isset(smp_processor_id(), mask) )
{
local_flush_tlb();
- mask &= ~(1UL << smp_processor_id());
+ cpu_clear(smp_processor_id(), mask);
}
- if ( mask != 0 )
+ if ( !cpus_empty(mask) )
{
spin_lock(&flush_lock);
flush_cpumask = mask;
flush_va = va;
- {
- cpumask_t _mask;
- cpus_addr(_mask)[0] = mask;
- send_IPI_mask(_mask, INVALIDATE_TLB_VECTOR);
- }
- while ( flush_cpumask != 0 )
+ send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
+ while ( !cpus_empty(flush_cpumask) )
cpu_relax();
spin_unlock(&flush_lock);
}
if ( num_online_cpus() > 1 )
{
spin_lock(&flush_lock);
- flush_cpumask = (1UL << num_online_cpus()) - 1;
- flush_cpumask &= ~(1UL << smp_processor_id());
- flush_va = FLUSHVA_ALL;
+ flush_cpumask = cpu_online_map;
+ flush_va = FLUSHVA_ALL;
send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
- while ( flush_cpumask != 0 )
+ cpu_clear(smp_processor_id(), flush_cpumask);
+ while ( !cpus_empty(flush_cpumask) )
cpu_relax();
spin_unlock(&flush_lock);
}
local_flush_tlb_pge();
}
-void smp_send_event_check_mask(unsigned long cpu_mask)
+void smp_send_event_check_mask(cpumask_t mask)
{
- cpumask_t mask;
- cpu_mask &= ~(1UL << smp_processor_id());
- cpus_addr(mask)[0] = cpu_mask;
- if ( cpu_mask != 0 )
+ cpu_clear(smp_processor_id(), mask);
+ if ( !cpus_empty(mask) )
send_IPI_mask(mask, EVENT_CHECK_VECTOR);
}
struct call_data_struct {
void (*func) (void *info);
void *info;
- unsigned long started;
- unsigned long finished;
- int wait;
+ atomic_t started;
+ atomic_t finished;
};
static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
void (*func) (void *info), void *info, int unused, int wait)
{
struct call_data_struct data;
- unsigned long cpuset;
+ unsigned int nr_cpus = num_online_cpus() - 1;
ASSERT(local_irq_is_enabled());
- cpuset = ((1UL << num_online_cpus()) - 1) & ~(1UL << smp_processor_id());
- if ( cpuset == 0 )
+ if ( nr_cpus == 0 )
return 0;
data.func = func;
data.info = info;
- data.started = data.finished = 0;
- data.wait = wait;
+ atomic_set(&data.started, 0);
+ atomic_set(&data.finished, 0);
spin_lock(&call_lock);
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
- while ( (wait ? data.finished : data.started) != cpuset )
+ while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
cpu_relax();
spin_unlock(&call_lock);
ack_APIC_irq();
perfc_incrc(ipis);
- if ( call_data->wait )
- {
- (*func)(info);
- mb();
- set_bit(smp_processor_id(), &call_data->finished);
- }
- else
- {
- mb();
- set_bit(smp_processor_id(), &call_data->started);
- (*func)(info);
- }
+ mb();
+ atomic_inc(&call_data->started);
+
+ (*func)(info);
+
+ mb();
+ atomic_inc(&call_data->finished);
}
while ( test_bit(_VCPUF_running, &v->vcpu_flags) )
cpu_relax();
- sync_lazy_execstate_cpuset(d->cpuset);
- BUG_ON(d->cpuset != 0);
+ sync_lazy_execstate_mask(d->cpumask);
+ BUG_ON(!cpus_empty(d->cpumask));
sync_pagetable_state(d);
flush++;
if ( flush == 1 )
- flush_tlb_one_mask(current->domain->cpuset, va);
+ flush_tlb_one_mask(current->domain->cpumask, va);
else if ( flush != 0 )
- flush_tlb_mask(current->domain->cpuset);
+ flush_tlb_mask(current->domain->cpumask);
return 0;
}
flush++;
if ( flush == 1 )
- flush_tlb_one_mask(current->domain->cpuset, va);
+ flush_tlb_one_mask(current->domain->cpumask, va);
else if ( flush != 0 )
- flush_tlb_mask(current->domain->cpuset);
+ flush_tlb_mask(current->domain->cpumask);
return 0;
}
struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
{
struct pfn_info *pg;
- unsigned long mask = 0;
+ cpumask_t mask;
int i;
ASSERT(!in_irq());
if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
return NULL;
- for ( i = 0; i < (1 << order); i++ )
+ mask = pg->u.free.cpumask;
+ tlbflush_filter(mask, pg->tlbflush_timestamp);
+
+ pg->count_info = 0;
+ pg->u.inuse._domain = 0;
+ pg->u.inuse.type_info = 0;
+
+ for ( i = 1; i < (1 << order); i++ )
{
- mask |= tlbflush_filter_cpuset(
- pg[i].u.free.cpu_mask & ~mask, pg[i].tlbflush_timestamp);
+ /* Add in any extra CPUs that need flushing because of this page. */
+ cpumask_t extra_cpus_mask;
+ cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
+ tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
+ cpus_or(mask, mask, extra_cpus_mask);
pg[i].count_info = 0;
pg[i].u.inuse._domain = 0;
pg[i].u.inuse.type_info = 0;
}
- if ( unlikely(mask != 0) )
+ if ( unlikely(!cpus_empty(mask)) )
{
perfc_incrc(need_flush_tlb_flush);
flush_tlb_mask(mask);
ASSERT(((pg[i].u.inuse.type_info & PGT_count_mask) == 0) ||
shadow_tainted_refcnts(d));
pg[i].tlbflush_timestamp = tlbflush_current_time();
- pg[i].u.free.cpu_mask = d->cpuset;
+ pg[i].u.free.cpumask = d->cpumask;
list_del(&pg[i].list);
}
while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) )
cpu_relax();
- sync_lazy_execstate_cpuset(v->domain->cpuset & (1UL << v->processor));
+ if ( cpu_isset(v->processor, v->domain->cpumask) )
+ sync_lazy_execstate_cpu(v->processor);
}
void domain_wake(struct vcpu *v)
extern u32 tlbflush_time[NR_CPUS];
#define tlbflush_current_time() tlbflush_clock
-#define tlbflush_filter_cpuset(x,y) (0)
+#define tlbflush_filter(x,y) ((void)0)
#define NEED_FLUSH(x, y) (0)
#endif
}
/*
- * Filter the given set of CPUs, returning only those that may not have
- * flushed their TLBs since @page_timestamp.
+ * Filter the given set of CPUs, removing those that definitely flushed their
+ * TLB since @page_timestamp.
*/
-static inline unsigned long tlbflush_filter_cpuset(
- unsigned long cpuset, u32 page_timestamp)
-{
- int i;
- unsigned long remain;
-
- for ( i = 0, remain = ~0UL; (cpuset & remain) != 0; i++, remain <<= 1 )
- {
- if ( (cpuset & (1UL << i)) &&
- !NEED_FLUSH(tlbflush_time[i], page_timestamp) )
- cpuset &= ~(1UL << i);
- }
-
- return cpuset;
-}
+#define tlbflush_filter(mask, page_timestamp) \
+do { \
+ unsigned int cpu; \
+ for_each_cpu_mask ( cpu, mask ) \
+ if ( !NEED_FLUSH(tlbflush_time[cpu], page_timestamp) ) \
+ cpu_clear(cpu, mask); \
+} while ( 0 )
extern void new_tlbflush_clock_period(void);
#define local_flush_tlb_one(__addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
-#define flush_tlb_all() flush_tlb_mask((1 << num_online_cpus()) - 1)
+#define flush_tlb_all() flush_tlb_mask(cpu_online_map)
#ifndef CONFIG_SMP
-#define flush_tlb_all_pge() local_flush_tlb_pge()
-#define flush_tlb_mask(_mask) local_flush_tlb()
-#define flush_tlb_one_mask(_mask,_v) local_flush_tlb_one(_v)
+#define flush_tlb_all_pge() local_flush_tlb_pge()
+#define flush_tlb_mask(mask) local_flush_tlb()
+#define flush_tlb_one_mask(mask,v) local_flush_tlb_one(_v)
#else
#include <xen/smp.h>
#define FLUSHVA_ALL (~0UL)
extern void flush_tlb_all_pge(void);
-extern void __flush_tlb_mask(unsigned long mask, unsigned long va);
-#define flush_tlb_mask(_mask) __flush_tlb_mask(_mask,FLUSHVA_ALL)
-#define flush_tlb_one_mask(_mask,_v) __flush_tlb_mask(_mask,_v)
+extern void __flush_tlb_mask(cpumask_t mask, unsigned long va);
+#define flush_tlb_mask(mask) __flush_tlb_mask(mask,FLUSHVA_ALL)
+#define flush_tlb_one_mask(mask,v) __flush_tlb_mask(mask,v)
#endif
#endif /* __FLUSHTLB_H__ */
#define __ASM_X86_MM_H__
#include <xen/config.h>
+#include <xen/cpumask.h>
#include <xen/list.h>
#include <asm/io.h>
#include <asm/uaccess.h>
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
struct {
/* Mask of possibly-tainted TLBs. */
- u32 cpu_mask;
+ cpumask_t cpumask;
/* Order-size of the free chunk this page is the head of. */
u8 order;
} PACKED free;
* linear_addr: Linear address to be flushed from the local TLB.
*
* cmd: MMUEXT_TLB_FLUSH_MULTI
- * cpuset: Pointer to bitmap of VCPUs to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
*
* cmd: MMUEXT_INVLPG_MULTI
* linear_addr: Linear address to be flushed.
- * cpuset: Pointer to bitmap of VCPUs to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
*
* cmd: MMUEXT_TLB_FLUSH_ALL
* No additional arguments. Flushes all VCPUs' TLBs.
/* SET_LDT */
unsigned int nr_ents;
/* TLB_FLUSH_MULTI, INVLPG_MULTI */
- void *cpuset;
+ void *vcpumask;
};
};
#endif
/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
-#define UVMF_NONE (0UL) /* No flushing at all. */
+#define UVMF_NONE (0UL<<0) /* No flushing at all. */
#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
#define UVMF_FLUSHTYPE_MASK (3UL<<0)
-#define UVMF_MULTI (0UL<<1) /* Flush subset of TLBs. */
+#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
struct vcpu *vcpu[MAX_VIRT_CPUS];
- /* Bitmask of CPUs on which this domain is running. */
- unsigned long cpuset;
+ /* Bitmask of CPUs which are holding onto this domain's state. */
+ cpumask_t cpumask;
struct arch_domain arch;
};
* Force loading of currently-executing domain state on the specified set
* of CPUs. This is used to counteract lazy state switching where required.
*/
-extern void sync_lazy_execstate_cpuset(unsigned long cpuset);
+extern void sync_lazy_execstate_cpu(unsigned int cpu);
+extern void sync_lazy_execstate_mask(cpumask_t mask);
extern void sync_lazy_execstate_all(void);
extern int __sync_lazy_execstate(void);
*/
extern void smp_send_stop(void);
-extern void smp_send_event_check_mask(unsigned long cpu_mask);
-#define smp_send_event_check_cpu(_cpu) smp_send_event_check_mask(1<<(_cpu))
+extern void smp_send_event_check_mask(cpumask_t mask);
+#define smp_send_event_check_cpu(cpu) \
+ smp_send_event_check_mask(cpumask_of_cpu(cpu))
/*
* Prepare machine for booting other CPUs.
* These macros fold the SMP functionality into a single CPU system
*/
-#define smp_send_event_check_mask(_m) ((void)0)
-#define smp_send_event_check_cpu(_p) ((void)0)
+#define smp_send_event_check_mask(m) ((void)0)
+#define smp_send_event_check_cpu(p) ((void)0)
#ifndef __smp_processor_id
#define smp_processor_id() 0
#endif